Load libraries

library(tidyverse) # data manipulation
library(ggpubr) # producing data exploratory plots
library(modelsummary) # descriptive data 
library(glmmTMB) # running generalised mixed models 
library(DHARMa) # model diagnostics 
library(performance) # model diagnostics  
library(ggeffects) # partial effect plots 
library(car) # running Anova on model 
library(emmeans) # post-hoc analysis

Import data

df_adults <- read_csv("import_data/resp_results_adults2.csv")
df_jresp <- read_csv("import_data/resp_results_juveniles.csv")

Data manipulation

Adults

df_adults_cleaned <- df_adults |> 
  mutate(FISH_ID = factor(FISH_ID), 
         Sex = factor(Sex), 
         Population = factor(Population), 
         Tank = factor(Tank), 
         Chamber = factor(Chamber), 
         System =factor(System), 
         Temperature =factor(Temperature), 
         True_resting=factor(True_resting)) 

df_males <- df_adults_cleaned |> 
  filter(Sex == "M")
df_females <- df_adults_cleaned |> 
  filter(Sex == "F")

df_adults_cleaned2 <- df_males |> 
  full_join(select(df_females, c("Tank","Temperature","Mass","Resting","Max","AAS","FISH_ID","Sex")), by="Tank") |> 
  mutate(Temperature.x = coalesce(Temperature.x, Temperature.y), 
         FISH_ID.x = coalesce(FISH_ID.x, FISH_ID.y),
         Sex.x = coalesce(Sex.x, Sex.y),
         Resting.midpoint = (Resting.x+Resting.y)/2, 
         Max.midpoint = (Max.x+Max.y)/2, 
         AAS.midpoint = (AAS.x+AAS.y)/2) 

Juveniles

df_jresp$Population <-  fct_collapse(df_jresp$Population, 
                                      `Vlassof cay`= c("Vlassof reef", "Vlassof", "Vlassof Cay", "Vlassof cay"), 
                                      `Arlington reef` = c("Arlington reef","Arlginton reef")) 

#df_jresp$Female <-  fct_collapse(df_jresp$Female, 
                                  #`CARL359`= c("CARL359", "CARL59")) 


df_jresp2 <-  df_jresp |> 
  unite("F0", c("Male","Female"), sep="_", remove=FALSE) |>
  mutate(across(1:7, factor), 
         Temperature = factor(Temperature), 
         True_resting = factor(True_resting)) 

#df_jresp2_rest <- df_jresp2 |> 
  #filter(True_resting == "Y")

Merging dataframes

temp1a <- df_jresp2 |> 
  mutate(FISH_ID.x = Male)
temp1b <- df_jresp2 |> 
  mutate(FISH_ID.y = Female)
temp2a <- temp1a |> 
  left_join(select(df_adults_cleaned2, c("FISH_ID.x",
                                          "Sex.x",
                                          "Resting.x", 
                                          "Max.x", 
                                          "AAS.x", 
                                          "Mass.x")), 
                    by="FISH_ID.x")
temp2b <- temp1b |> 
  left_join(select(df_adults_cleaned2, c("FISH_ID.y",
                            "Sex.y",
                            "Resting.y", 
                            "Max.y", 
                            "AAS.y", 
                            "Mass.y")), 
                   by="FISH_ID.y") 
df_merged <- temp2a |> 
  left_join(select(temp2b, c("Clutch","Replicate", 
                             "FISH_ID.y",
                             "Resting.y", 
                             "Max.y", 
                             "AAS.y", 
                             "Mass.y")), 
            by=c("Clutch","Replicate"))
df <- df_merged |> 
  mutate(Resting_MALE =Resting.x, 
         Max_MALE =Max.x, 
         AAS_MALE =AAS.x, 
         Mass_MALE =Mass.x, 
         FISH_ID.y =FISH_ID.x,#makes more sense for males to be .y instead of .x
         FISH_ID.x =FISH_ID.x, 
         Resting_FEMALE =Resting.y, 
         Max_FEMALE =Max.y, 
         AAS_FEMALE =AAS.y, 
         Mass_FEMALE =Mass.y) |>  
  mutate(Resting_MALE = Resting_MALE/Mass_MALE, 
         Resting_FEMALE =Resting_FEMALE/Mass_FEMALE) |>
  mutate(Resting_MID =(Resting_MALE+Resting_FEMALE)/2) |> # easier to do it again
  filter(True_resting == "Y") |> # this should remove 10 individuals from the df
  mutate(Resting_MID =coalesce(Resting_MID, Resting_MALE)) |> 
  mutate(Resting_MID =coalesce(Resting_MID, Resting_FEMALE)) |> 
  drop_na(Resting) |> 
  group_by(Clutch) |> 
  mutate(MEDIAN_Resting =median(Resting_kg_wet)) |> 
  ungroup() |> 
  select(-c(Replicate, Chamber, System, Volume, Date_tested, Swim, Mass, Dry_mass, 18:26)) |> 
  distinct() |> 
  drop_na(Resting_MID)

Exploratory analysis

Offspring-Male

plot2 <- ggplot(df, aes(x=Resting_MALE, y=MEDIAN_Resting, color=Temperature)) + 
  stat_smooth(method = "lm") +
  #geom_point(alpha=0.1) + 
  ggtitle("Offspring-male relationship") + 
  xlab("Resting (offspring)") + 
  ylab("Resting (parental-male)") +
  theme_classic() + 
  theme(legend.position = "bottom")

plot2

Offspring-Midpoint

plot <- ggplot(df, aes(x=Resting_MID, y=MEDIAN_Resting, color=Temperature)) + 
  stat_smooth(method = "lm") +
  #geom_point(alpha=0.1) + 
  ggtitle("Offspring-midpoint relationship") + 
  xlab("Resting (offspring)") + ylab("Resting (parental-midpoint)") +
  theme_classic() + 
  theme(legend.position = 'right')

plot

Descriptive statistics

Juveniles - overview

Overview

tinytable_kslz6meiilcrqfsgjldj
Population 27 28.5 30
Arlington reef 7 6 3
Pretty patches 4 3 5
Sudbury reef 4 2 2
Vlassof cay 4 0 4
datasummary(Factor(F0) ~ Factor(Temperature), 
            data = df, 
            fmt = "%.0f")
tinytable_rms0qs8j7w8y2hq7a972
F0 27 28.5 30
CARL217_CARL226 0 1 0
CARL218_CARL222 0 0 2
CARL230_CARL235 0 0 0
CARL233_CARL215 0 0 0
CARL237_CARL219 2 0 0
CARL241_CARL239 2 0 0
CARL249_CARL360 0 0 1
CARL335_CARL359 0 2 0
CARL338_CARL345 0 1 0
CARL344_CARL370 0 0 0
CARL354_CARL355 3 0 0
CARL360_CARL249 0 0 0
CARL367_CARL363 0 1 0
CARL369_CARL349 0 1 0
CPRE189_CPRE202 0 0 2
CPRE372_CPRE209 1 0 0
CPRE372_CPRE370 1 0 0
CPRE375_CPRE377 2 0 0
CPRE391_CPRE390 0 0 1
CPRE447_CPRE452 0 0 2
CPRE453_CPRE459 0 1 0
CPRE521_CPRE524 0 1 0
CPRE550_CPRE533 0 1 0
CSUD002_CSUD213 0 1 0
CSUD009_CSUD212 2 0 0
CSUD013_CSUD017 2 0 0
CSUD016_CSUD078 0 1 0
CSUD312_CSUD304 0 0 2
CVLA049_CVLA098 0 0 0
CVLA089_CVLA059 0 0 1
CVLA102_CVLA466 1 0 0
CVLA106_CVLA091 0 0 2
CVLA468_CVLA477 2 0 0
CVLA486_CVLA463 1 0 0
CVLA498_CVLA493 0 0 1

Juveniles

Resting oxygen uptake

tinytable_hegxzud2njq5kvokj6xb
Temperature NUnique mean median min max sd Histogram
27 9 6.57 6.14 4.18 10.09 1.82 ▅▂▃▇▃▇▂
28.5 10 6.07 5.71 4.40 8.49 1.40 ▇▂▂▅▂▅▂
30 6 6.73 6.18 5.14 9.15 1.46 ▅▂▇▅▅

Adults - overview

Overview

datasummary(Factor(Population) ~ Factor(Temperature), 
            data = df_adults_cleaned, 
            fmt = "%.0f")
tinytable_b6tv9djkz6pdwbokfo1k
Population 27 28.5 30
Arlington reef 8 7 4
Pretty patches 4 6 4
Sudbury reef 4 3 2
Vlassof cay 6 2 5
datasummary(Factor(Population) ~ Factor(Temperature)*Factor(Sex), 
            data = df_adults_cleaned, 
            fmt = "%.0f")
tinytable_82zcdrdvzsflsabtfmdw
27 28.5 30
Population F M F M F M
Arlington reef 4 4 2 5 2 2
Pretty patches 2 2 3 3 3 1
Sudbury reef 2 2 1 2 1 1
Vlassof cay 3 3 1 1 3 2

Pairs

datasummary(Factor(Population)*Factor(Temperature.x) ~ AAS.midpoint*(NUnique), 
            data = df_adults_cleaned2, 
            fmt = "%.0f")
tinytable_hcvyhjjztj8i6v6qp6fq
Population Temperature.x NUnique
Arlington reef 27 3
28.5 2
30 1
Pretty patches 27 2
28.5 3
30 1
Sudbury reef 27 2
28.5 2
30 1
Vlassof cay 27 3
28.5 1
30 2

Adults

Resting oxygen uptake

tinytable_4966jmcn5pi4qv5awdze
Temperature NUnique mean median min max sd Histogram
27 19 6.36 6.14 3.82 10.09 1.67 ▃▃▅▇▃▃▂▂▂▂
28.5 16 6.29 6.23 4.35 8.49 1.41 ▇▂▅▂▃▃▃▂
30 14 7.17 6.86 5.14 9.15 1.43 ▅▂▇▂▂▂▇▅

Fit Models [random factors]

model1 <- glmmTMB(MEDIAN_Resting ~ 1, 
                  family="gaussian",
                  data = df) 

model2 <- glmmTMB(MEDIAN_Resting ~ (1|Population), 
                  family="gaussian",
                  data = df)
Model selection
AIC(model1, model2, k=2)
BIC(model1, model2)

Model1 performs the best therefore only Clutch will be used as a random factor in future models

Relationships

Offspring-Male

Fit model [fixed factors]

After figuring out which random factors will be incorporated into the model we will start to examine out fixed factors. Some fixed factors such as Resting_(FE)MALE and TEMPERATURE will be essential to answering questions we have around heritability. Another factor that will be included is Dry_mass - which it should be pointed out in this experiment refers to the mass of fish after they were blotted dry with paper towel rather than completely dried out. Larger fish consume more oxygen, therefore, we need to account for this known relationship within our model. Out model will look something like this:

MEDIAN_Resting ~ Resting_MALE*Temprature + scale(Dry_mass)

If we had alternative hypotheses to test would would do so at this stage. But in this instance the experiment was designed to answer a specific question via limiting potential covariates.

model1.1 <- glmmTMB(MEDIAN_Resting ~ scale(Resting_MALE)*Temperature, 
                    family=gaussian(), 
                    data=df)

Great now lets check how out model performed via model validation techniques

Model validation

To check out model performance we will be using two different packages that perform model diagnositics. The packages used here are just examples, there are other packages out there that can provide the same function.

DHARMa

model1.1 |> 
  simulateResiduals(plot=TRUE) 

## Object of Class DHARMa with simulated residuals based on 250 simulations with refit = FALSE . See ?DHARMa::simulateResiduals for help. 
##  
## Scaled residual values: 0.056 0.04 0.28 0.212 0.788 0.184 0.188 0.956 0.708 0.156 0.188 0.376 0.712 0.936 0.968 0.572 0.8 0.66 0.852 0.18 ...
model1.1 |> 
  testResiduals(plot=TRUE)

## $uniformity
## 
##  Asymptotic one-sample Kolmogorov-Smirnov test
## 
## data:  simulationOutput$scaledResiduals
## D = 0.12211, p-value = 0.6226
## alternative hypothesis: two-sided
## 
## 
## $dispersion
## 
##  DHARMa nonparametric dispersion test via sd of residuals fitted vs.
##  simulated
## 
## data:  simulationOutput
## dispersion = 1.0253, p-value = 0.84
## alternative hypothesis: two.sided
## 
## 
## $outliers
## 
##  DHARMa outlier test based on exact binomial test with approximate
##  expectations
## 
## data:  simulationOutput
## outliers at both margin(s) = 0, observations = 38, p-value = 1
## alternative hypothesis: true probability of success is not equal to 0.007968127
## 95 percent confidence interval:
##  0.00000000 0.09251276
## sample estimates:
## frequency of outliers (expected: 0.00796812749003984 ) 
##                                                      0
## $uniformity
## 
##  Asymptotic one-sample Kolmogorov-Smirnov test
## 
## data:  simulationOutput$scaledResiduals
## D = 0.12211, p-value = 0.6226
## alternative hypothesis: two-sided
## 
## 
## $dispersion
## 
##  DHARMa nonparametric dispersion test via sd of residuals fitted vs.
##  simulated
## 
## data:  simulationOutput
## dispersion = 1.0253, p-value = 0.84
## alternative hypothesis: two.sided
## 
## 
## $outliers
## 
##  DHARMa outlier test based on exact binomial test with approximate
##  expectations
## 
## data:  simulationOutput
## outliers at both margin(s) = 0, observations = 38, p-value = 1
## alternative hypothesis: true probability of success is not equal to 0.007968127
## 95 percent confidence interval:
##  0.00000000 0.09251276
## sample estimates:
## frequency of outliers (expected: 0.00796812749003984 ) 
##                                                      0

performance

model1.1 |> check_model(detrend=FALSE)
## `check_outliers()` does not yet support models of class `glmmTMB`.

Partial effect plots

model1.1 |> ggemmeans(~Resting_MALE|Temperature) |> 
  plot(add.data =FALSE)

Model investigations

summary

model1.1 |> summary()
##  Family: gaussian  ( identity )
## Formula:          MEDIAN_Resting ~ scale(Resting_MALE) * Temperature
## Data: df
## 
##      AIC      BIC   logLik deviance df.resid 
##    410.5    421.9   -198.2    396.5       31 
## 
## 
## Dispersion estimate for gaussian family (sigma^2): 1.99e+03 
## 
## Conditional model:
##                                     Estimate Std. Error z value Pr(>|z|)    
## (Intercept)                          360.658     13.508  26.699  < 2e-16 ***
## scale(Resting_MALE)                   14.569     15.140   0.962  0.33591    
## Temperature28.5                       37.527     19.191   1.955  0.05053 .  
## Temperature30                         67.010     22.087   3.034  0.00241 ** 
## scale(Resting_MALE):Temperature28.5   -6.427     23.361  -0.275  0.78324    
## scale(Resting_MALE):Temperature30    -35.345     20.218  -1.748  0.08042 .  
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1

ANOVA

model1.1 |> Anova()

confint

model1.1 |> confint()
##                                            2.5 %     97.5 %   Estimate
## (Intercept)                         334.18265949 387.133802 360.658231
## scale(Resting_MALE)                 -15.10465354  44.242130  14.568738
## Temperature28.5                      -0.08588536  75.139531  37.526823
## Temperature30                        23.71964274 110.300830  67.010237
## scale(Resting_MALE):Temperature28.5 -52.21242315  39.359227  -6.426598
## scale(Resting_MALE):Temperature30   -74.97119345   4.280644 -35.345275

r-squared

model1.1 |> r2_nakagawa()
## Random effect variances not available. Returned R2 does not account for random effects.
## # R2 for Mixed Models
## 
##   Conditional R2: NA
##      Marginal R2: 0.313

Pairwise comparisons

emmeans [Temperature]

model1.1 |> emmeans(pairwise ~ Temperature, type="response") |> 
  summary(by=NULL, adjust="sidak", infer=TRUE)
## NOTE: Results may be misleading due to involvement in interactions
## $emmeans
##  Temperature emmean   SE df lower.CL upper.CL t.ratio p.value
##  27             361 13.5 31      327      395  26.699  <.0001
##  28.5           398 13.6 31      364      433  29.212  <.0001
##  30             428 17.5 31      384      472  24.473  <.0001
## 
## Confidence level used: 0.95 
## Conf-level adjustment: sidak method for 3 estimates 
## P value adjustment: sidak method for 3 tests 
## 
## $contrasts
##  contrast                        estimate   SE df lower.CL upper.CL t.ratio
##  Temperature27 - Temperature28.5    -37.5 19.2 31    -86.0     10.9  -1.955
##  Temperature27 - Temperature30      -67.0 22.1 31   -122.8    -11.3  -3.034
##  Temperature28.5 - Temperature30    -29.5 22.2 31    -85.4     26.4  -1.330
##  p.value
##   0.1683
##   0.0145
##   0.4747
## 
## Confidence level used: 0.95 
## Conf-level adjustment: sidak method for 3 estimates 
## P value adjustment: sidak method for 3 tests

Summary figure

om.rest <- emmeans(model1.1, ~Resting_MALE*Temperature, 
                   at =list(Resting_MALE =seq(from=106, to =241, by=1)))

om.rest.df <- as.data.frame(om.rest)

om.rest.obs <- drop_na(df, Resting_MALE, MEDIAN_Resting) |> 
  mutate(Pred =predict(model1.1, re.form =NA, type='response'), 
         Resid =residuals(model1.1, type ="response"), 
         Fit =Pred + Resid) 

om.rest.obs.summarize <- om.rest.obs |> 
  group_by(Clutch, Temperature) |> 
  summarise(mean.rest =mean(Fit, na.rm=TRUE),
            mean.rest_male =mean(Resting_MALE, na.rm=TRUE),
            sd.rest =sd(Fit, na.rm =TRUE), 
            n.rest = n()) |> 
  mutate(se.rest = sd.rest / sqrt(n.rest), 
         lower.ci.rest =mean.rest - qt(1 - (0.05/2), n.rest -1) * se.rest, 
         upper.ci.rest =mean.rest + qt(1 - (0.05/2), n.rest - 1) * se.rest)|>
  ungroup()
## `summarise()` has grouped output by 'Clutch'. You can override using the
## `.groups` argument.
## Warning: There were 76 warnings in `mutate()`.
## The first warning was:
## ℹ In argument: `lower.ci.rest = mean.rest - qt(1 - (0.05/2), n.rest - 1) *
##   se.rest`.
## ℹ In group 1: `Clutch = 38`.
## Caused by warning in `qt()`:
## ! NaNs produced
## ℹ Run `dplyr::last_dplyr_warnings()` to see the 75 remaining warnings.
ggplot(data =om.rest.df, aes(y=emmean, x=Resting_MALE)) + 
  stat_smooth(aes(color=Temperature), 
              method = "lm") + 
  geom_pointrange(data = om.rest.obs.summarize, aes(y =mean.rest, x=mean.rest_male, 
                                                    ymin =lower.ci.rest, 
                                                    ymax =upper.ci.rest, color = Temperature), 
                  alpha =0.2) + 
  facet_wrap(~Temperature) +
  theme_classic() + 
  theme(legend.position ="bottom")
## `geom_smooth()` using formula = 'y ~ x'
## Warning: Removed 17 rows containing missing values or values outside the scale range
## (`geom_segment()`).
## Warning: Removed 11 rows containing missing values or values outside the scale range
## (`geom_segment()`).
## Warning: Removed 10 rows containing missing values or values outside the scale range
## (`geom_segment()`).

Offspring-midpoint

Fit model [fixed factors]

mid_model1.1 <- glmmTMB(MEDIAN_Resting ~ scale(Resting_MID)*Temperature, 
                    family="gaussian", 
                    data=df)

Great now lets check how out model performed via model validation techniques

Model validation

To check out model performance we will be using two different packages that perform model diagnositics. The packages used here are just examples, there are other packages out there that can provide the same function.

DHARMa

mid_model1.1 |> 
  simulateResiduals(plot=TRUE) 

## Object of Class DHARMa with simulated residuals based on 250 simulations with refit = FALSE . See ?DHARMa::simulateResiduals for help. 
##  
## Scaled residual values: 0.036 0.504 0.056 0.212 0.34 0.064 0.704 0.172 0.232 0.196 0.952 0.704 0.308 0.164 0.364 0.812 0.928 0.792 0.524 0.968 ...
mid_model1.1 |> 
  testResiduals(plot=TRUE)

## $uniformity
## 
##  Asymptotic one-sample Kolmogorov-Smirnov test
## 
## data:  simulationOutput$scaledResiduals
## D = 0.11309, p-value = 0.6269
## alternative hypothesis: two-sided
## 
## 
## $dispersion
## 
##  DHARMa nonparametric dispersion test via sd of residuals fitted vs.
##  simulated
## 
## data:  simulationOutput
## dispersion = 1.0226, p-value = 0.808
## alternative hypothesis: two.sided
## 
## 
## $outliers
## 
##  DHARMa outlier test based on exact binomial test with approximate
##  expectations
## 
## data:  simulationOutput
## outliers at both margin(s) = 0, observations = 44, p-value = 1
## alternative hypothesis: true probability of success is not equal to 0.007968127
## 95 percent confidence interval:
##  0.00000000 0.08041994
## sample estimates:
## frequency of outliers (expected: 0.00796812749003984 ) 
##                                                      0
## $uniformity
## 
##  Asymptotic one-sample Kolmogorov-Smirnov test
## 
## data:  simulationOutput$scaledResiduals
## D = 0.11309, p-value = 0.6269
## alternative hypothesis: two-sided
## 
## 
## $dispersion
## 
##  DHARMa nonparametric dispersion test via sd of residuals fitted vs.
##  simulated
## 
## data:  simulationOutput
## dispersion = 1.0226, p-value = 0.808
## alternative hypothesis: two.sided
## 
## 
## $outliers
## 
##  DHARMa outlier test based on exact binomial test with approximate
##  expectations
## 
## data:  simulationOutput
## outliers at both margin(s) = 0, observations = 44, p-value = 1
## alternative hypothesis: true probability of success is not equal to 0.007968127
## 95 percent confidence interval:
##  0.00000000 0.08041994
## sample estimates:
## frequency of outliers (expected: 0.00796812749003984 ) 
##                                                      0

performance

mid_model1.1 |> check_model(detrend=FALSE)
## `check_outliers()` does not yet support models of class `glmmTMB`.

Partial effect plots

mid_model1.1 |> ggemmeans(~Resting_MID|Temperature) |> 
  plot(add.data =FALSE)

Model investigations

summary

mid_model1.1 |> summary()
##  Family: gaussian  ( identity )
## Formula:          MEDIAN_Resting ~ scale(Resting_MID) * Temperature
## Data: df
## 
##      AIC      BIC   logLik deviance df.resid 
##    473.7    486.2   -229.8    459.7       37 
## 
## 
## Dispersion estimate for gaussian family (sigma^2): 2.02e+03 
## 
## Conditional model:
##                                    Estimate Std. Error z value Pr(>|z|)    
## (Intercept)                        354.8089    15.8638  22.366   <2e-16 ***
## scale(Resting_MID)                   2.5967    16.8130   0.154   0.8773    
## Temperature28.5                     42.7136    20.9443   2.039   0.0414 *  
## Temperature30                       52.3823    24.1355   2.170   0.0300 *  
## scale(Resting_MID):Temperature28.5  13.2855    24.5443   0.541   0.5883    
## scale(Resting_MID):Temperature30    -0.5295    22.7682  -0.023   0.9814    
## ---
## Signif. codes:  0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1

ANOVA

mid_model1.1 |> Anova()

confint

mid_model1.1 |> confint()
##                                         2.5 %    97.5 %    Estimate
## (Intercept)                        323.716338 385.90140 354.8088679
## scale(Resting_MID)                 -30.356162  35.54963   2.5967353
## Temperature28.5                      1.663573  83.76364  42.7136077
## Temperature30                        5.077517  99.68706  52.3822901
## scale(Resting_MID):Temperature28.5 -34.820367  61.39137  13.2855020
## scale(Resting_MID):Temperature30   -45.154296  44.09532  -0.5294903

r-squared

mid_model1.1 |> r2_nakagawa()
## Random effect variances not available. Returned R2 does not account for random effects.
## # R2 for Mixed Models
## 
##   Conditional R2: NA
##      Marginal R2: 0.265

Summary figure

om.rest <- emmeans(mid_model1.1, ~Resting_MID*Temperature, 
                   at =list(Resting_MID =seq(from=100, to =260, by=1)))

om.rest.df <- as.data.frame(om.rest)

om.rest.obs <- drop_na(df, Resting_MID, MEDIAN_Resting) |> 
  mutate(Pred =predict(mid_model1.1, re.form =NA, type='response'), 
         Resid =residuals(mid_model1.1, type ="response"), 
         Fit =Pred + Resid) 

om.rest.obs.summarize <- om.rest.obs |> 
  group_by(Clutch, Temperature) |> 
  summarise(mean.rest =mean(Fit, na.rm=TRUE),
            mean.rest_female =mean(Resting_MID, na.rm=TRUE),
            sd.rest =sd(Fit, na.rm =TRUE), 
            n.rest = n()) |> 
  mutate(se.rest = sd.rest / sqrt(n.rest), 
         lower.ci.rest =mean.rest - qt(1 - (0.05/2), n.rest -1) * se.rest, 
         upper.ci.rest =mean.rest + qt(1 - (0.05/2), n.rest - 1) * se.rest)|>
  ungroup()
## `summarise()` has grouped output by 'Clutch'. You can override using the
## `.groups` argument.
## Warning: There were 88 warnings in `mutate()`.
## The first warning was:
## ℹ In argument: `lower.ci.rest = mean.rest - qt(1 - (0.05/2), n.rest - 1) *
##   se.rest`.
## ℹ In group 1: `Clutch = 38`.
## Caused by warning in `qt()`:
## ! NaNs produced
## ℹ Run `dplyr::last_dplyr_warnings()` to see the 87 remaining warnings.
ggplot(data =om.rest.df, aes(y=emmean, x=Resting_MID)) + 
  stat_smooth(aes(color=Temperature), 
              method = "lm") + 
  geom_pointrange(data = om.rest.obs.summarize, aes(y =mean.rest, x=mean.rest_female, 
                                                    ymin =lower.ci.rest, 
                                                    ymax =upper.ci.rest, color = Temperature), 
                  alpha =0.2) + 
  facet_wrap(~Temperature) +
  theme_classic() + 
  theme(legend.position ="bottom")
## `geom_smooth()` using formula = 'y ~ x'
## Warning: Removed 19 rows containing missing values or values outside the scale range
## (`geom_segment()`).
## Warning: Removed 11 rows containing missing values or values outside the scale range
## (`geom_segment()`).
## Warning: Removed 14 rows containing missing values or values outside the scale range
## (`geom_segment()`).